kmem_cache_t *pgd_cache;
kmem_cache_t *pmd_cache;
+kmem_cache_t *pte_cache;
void __init pgtable_cache_init(void)
{
+ pte_cache = kmem_cache_create("pte",
+ PTRS_PER_PTE*sizeof(pte_t),
+ PTRS_PER_PTE*sizeof(pte_t),
+ 0,
+ pte_ctor,
+ pte_dtor);
+ if (!pte_cache)
+ panic("pgtable_cache_init(): Cannot create pte cache");
if (PTRS_PER_PMD > 1) {
pmd_cache = kmem_cache_create("pmd",
PTRS_PER_PMD*sizeof(pmd_t),
return pte;
}
+void pte_ctor(void *pte, kmem_cache_t *cache, unsigned long unused)
+{
+
+ clear_page(pte);
+ __make_page_readonly(pte);
+ queue_pte_pin(virt_to_phys(pte));
+ flush_page_update_queue();
+}
+
+void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
+{
+
+ queue_pte_unpin(virt_to_phys(pte));
+ __make_page_writable(pte);
+ flush_page_update_queue();
+}
+
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- struct page *pte;
+ pte_t *ptep;
#ifdef CONFIG_HIGHPTE
+ struct page *pte;
+
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
-#else
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
-#endif
- if (pte) {
-#ifdef CONFIG_HIGHPTE
- void *kaddr = kmap_atomic(pte, KM_USER0);
- clear_page(kaddr);
- kunmap_atomic_force(kaddr, KM_USER0);
-#else
+ if (pte == NULL)
+ return pte;
+ if (pte >= highmem_start_page) {
clear_highpage(pte);
-#endif
-#ifdef CONFIG_HIGHPTE
- if (pte < highmem_start_page)
-#endif
- {
- __make_page_readonly(phys_to_virt(page_to_pseudophys(pte)));
- flush_page_update_queue();
- }
+ return pte;
}
- return pte;
+ /* not a highmem page -- free page and grab one from the cache */
+ __free_page(pte);
+#endif
+ ptep = kmem_cache_alloc(pte_cache, GFP_KERNEL);
+ if (ptep)
+ return virt_to_page(ptep);
+ return NULL;
}
void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
#ifdef CONFIG_HIGHPTE
if (pte < highmem_start_page)
#endif
- {
- __make_page_writable(phys_to_virt(page_to_pseudophys(pte)));
+ kmem_cache_free(pte_cache,
+ phys_to_virt(page_to_pseudophys(pte)));
+#ifdef CONFIG_HIGHPTE
+ else
__free_page(pte);
- flush_page_update_queue();
- }
+#endif
}
-#define __pte_free_tlb(tlb,pte) do { \
- tlb_remove_page((tlb),(pte)); \
- flush_page_update_queue(); \
- /* XXXcl queue */ \
-} while (0)
+#define __pte_free_tlb(tlb,pte) pte_free(pte)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
extern pgd_t swapper_pg_dir[1024];
extern kmem_cache_t *pgd_cache;
extern kmem_cache_t *pmd_cache;
+extern kmem_cache_t *pte_cache;
extern spinlock_t pgd_lock;
extern struct page *pgd_list;
+void pte_ctor(void *, kmem_cache_t *, unsigned long);
+void pte_dtor(void *, kmem_cache_t *, unsigned long);
void pmd_ctor(void *, kmem_cache_t *, unsigned long);
void pgd_ctor(void *, kmem_cache_t *, unsigned long);
void pgd_dtor(void *, kmem_cache_t *, unsigned long);
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_clear(xp) do { \
- pmd_t p = *(xp); \
set_pmd(xp, __pmd(0)); \
- __make_page_writable((void *)pmd_page_kernel(p)); \
xen_flush_page_update_queue(); \
} while (0)